perm filename ENET.C[11,HE]1 blob
sn#690525 filedate 1982-12-08 generic text, type C, neo UTF8
COMMENT ā VALID 00016 PAGES
C REC PAGE DESCRIPTION
C00001 00001
C00002 00002 /* enet.c CMU 1/18/80 */
C00018 00003
C00020 00004
C00023 00005
C00028 00006
C00036 00007
C00039 00008
C00044 00009
C00048 00010
C00058 00011
C00061 00012
C00063 00013
C00066 00014
C00068 00015
C00072 00016
C00077 ENDMK
Cā;
/* enet.c CMU 1/18/80 */
/*
* Ethernet interface driver
*
**********************************************************************
* HISTORY
* 10-Aug-82 Mike Accetta (mja) at Carnegie-Mellon University
* Added new EIOCMBIS and EIOCMBIC ioctl calls to set and clear
* bits in mode word; added mode bit ENHOLDSIG which suppresses
* the resetting of an enabled signal after it is sent (to be
* used inconjunction with the SIGHOLD mechanism); changed
* EIOCGETP to zero pad word for future compatibility; changed enwrite()
* to enforce correct source host address on output packets (V3.05e).
*
* 01-Dec-81 Mike Accetta (mja) at Carnegie-Mellon University
* Fixed bug in timeout handling caused by missing "break" in the
* "switch" state check within enread(). This caused all reads
* to be preceeded by a bogus timeout. In addition, fixed another
* bug in signal processing by also recording process ID of
* process to signal when an input packet is available. This is
* necessary because it is possible for a process with an enabled
* signal to fork and exit with no guarantee that the child will
* reenable the signal. Thus under appropriately bizarre race
* conditions, an incoming packet to the child can cause a signal
* to be sent to the unsuspecting process which inherited the
* process slot of the parent. Of course, if the PID's wrap around
* AND the inheriting process has the same PID, well ... (V3.03d).
*
* 24-Nov-81 Mike Accetta (mja) at Carnegie-Mellon University
* Fixed bug in enrint() which failed to purge BDP before passing
* packet through input filters; fixed bug in EIOCSETF which
* failed to reinsert the changed filter at the proper position
* according to its priority; added some additional counters to
* keep track of various packet counts (V3.03c);
*
* 18-Nov-81 Mike Accetta (mja) at Carnegie-Mellon University
* Fixed bug which failed to check for zero length writes
* in enwrite() (V3.03b).
*
* 07-Oct-81 Mike Accetta (mja) at Carnegie-Mellon University
* Substantially rewritten for 4.1BSD and incorporating most ideas
* from previous changes by Bob Fitzgerald and Jeff Mogul below
* as well as:
* - support for new autoconfigure procedure and multiple units
* on multiple UBA's
* - UBA reset recovery
* - removal of 64K byte limit on buffers by using new ubaremap()
* routine to remap a small fixed number of registers on each
* I/O (only one buffer pool is now used as well)
* - support for both user and kernel mode access at the open, read,
* write, ioctl and close levels
* - fix to untimeout bug which could leave many pending timeouts
* hanging around after signals
* (V3.03a).
*
* 12-Aug-81 Bob Fitzgerald (rpf) at Carnegie-Mellon University
* - Changed packet queueing mechanisms to allow more (8)
* packets to be queued for the same open device and the
* same packet to be queued for more than one open device
* simultaneously. Done by changing the packet queueing
* mechanism from chaining through packet buffers to fixed
* size rings of packet buffer pointers.
* - Added scavenging in case free buffer queue is depleted.
* This is guaranteed to create free buffers as long as
* MINSCAVENGE > 0 (and we are not losing packet buffers).
* Scavenging is done automatically in DeQueueRFree.
* - Following Jeff Mogul@Stanford, split packet pool into
* separate transmit and receive pools, each of which is
* mapped onto the unibus independently. This saves
* mapping each buffer twice (once in each direction),
* thereby doubling the number of packets possible.
* - Simplified collision recovery mechanism (exponential
* backoff) by putting it in single centralized place
* rather than distributing it to each packet buffer.
* - Added 3 ioctl calls:
* EIOCSETW -- set maximum packet queueing for device
* EIOCFLSH -- discard all packets queued for device
* EIOCSTAT -- take a snapshot of status of all devices
* - Fixed a minor bug in enopen. It now checks for legality
* of minor device specification before checking to see
* if minor device is busy.
* - Converted OpenDescriptor.State -> OpenDescriptor.RecvState
* which is now an enumeration rather than a bit vector.
*
* 22-May-81 Bob Fitzgerald (rpf) at Carnegie-Mellon University
* Merged in some bug fixes due to Peter Eichenberger, installed
* by Jeffrey Mogul at Stanford.
* "Fixed unprotected critical sections which caused
* garbage packets to be sent and received packets
* to be lost. Fixed bugs in check that UNIBUS
* addresses fall within frst 64K bytes. Added check
* for illegal minor dev number in open."
*
* 23-Apr-81 Mike Accetta (mja)) at Carnegie-Mellon University
* Moved some structure declarations to enet.h to make them
* avialable to pstat (V2.00g).
*
* 01-Nov-80 Mike Accetta (mja) at Carnegie-Mellon University
* Upgraded for Berkeley version 3.34; extracted NENET and
* ENADDR defintions onto en.h; added support for FIONREAD ioctl
* call; removed support for FIOCCHKR ioctl call; added dummy
* reset routine for UNIBUS resets until they become a problem
* worth filling in the code for; fixed bug in recording process
* pointer for signals (V2.00).
*
* 04-Jun-80 Mike Accetta (mja) at Carnegie-Mellon University
* Changed to return immediately from read with EOF
* if no input packet is available and the timeout value is
* less than 0; also added EIOCENBS and EIOCINHS ioctl calls
* to enable and inhibit signals when an input packet is
* received (V1.07f).
*
* 01-Apr-80 Mike Accetta (mja) at Carnegie-Mellon University
* Changed back to using separate BDP's for input and output
* packets; alsoo added check that alloacted unibus addresses fall
* within the first 64K bytes (V1.06b).
*
* 27-Feb-80 Mike Accetta (mja) at Carnegie-Mellon University
* Modified enread() to correctly interpret zero length reads from
* the FIOCCHKR ioctl call; added EIOCGETP, EIOCSETP and EIOCSETF
* as synonyms for TIOCGETP, TIOCSETP, and TIOCSETD so that the
* tty versions may eventually be removed; added check for minor
* device below HIPRIDEV when setting priorities above HIPRI
* (V1.05a).
*
* 22-Feb-80 Rick Rashid (rfr) at Carnegie-Mellon University
* Rewritten to provide multiple user access via user settable
* filters (V1.05).
*
* 18-Jan-80 Mike Accetta (mja) at Carnegie-Mellon University
* Created (V1.00).
*
**********************************************************************
*/
#include "enet.h"
#define NEN NENET
#if NEN > 0
#include "hparam.h"
#include "hsystm.h"
#include "hdir.h"
#include "huser.h"
#include "hmap.h"
#include "hpte.h"
#include "hbuf.h"
#include "hubar.h"
#include "hubav.h"
#include "hproc.h"
#include "htty.h"
#include "hqueue.h"
#include "henet.h"
#include "hconf.h"
#define DEBUG 3
#define enprintf(flags) if (enDebug&(flags)) printf
#define min(a,b) ( ((a)<=(b)) ? (a) : (b) )
#define PRINET 26 /* interruptible */
/*
* 'enQueueElts' is the pool of packet headers used by the driver.
* 'enPackets' is the pool of packets used by the driver (these should
* be allocated dynamically when this becomes possible).
* 'enFreeq' is the queue of available packets
* 'enState' is the driver state table per logical unit number
* 'enUnit' is the physical unit number table per logical unit number;
* the first "attach"ed ethernet is logical unit 0, etc.
* 'enFreeqMin' is the minimum number of packets ever in the free queue
* (for statistics purposes)
* 'enAllocCount'is the number of packets allocated to external kernel
* applications
* 'enScavenges' is the number of scavenges of the active input queues
* (for statustics purposes)
* 'enDebug' is a collection of debugging bits which enable trace and/or
* diagnostic output as follows:
* 000001 - routine call trace (under DEBUG)
* 000002 - open descriptor queue trace (under DEBUG)
* 000004 - input error, collision and dropped packet
* diagnostics
*/
struct enPacket enQueueElts[ENPACKETS];
short enPackets[ENPACKETS][ENPACKETSIZE];
struct enQueue enFreeq;
struct enState enState[NEN];
char enUnit[NEN];
int enFreeqMin = ENPACKETS;
int enAllocCount = 0;
int enScavenges = 0;
int enDebug = 0;
/*
* Forward declarations for subroutines which return other
* than integer types.
*/
extern struct Queue *dequeue();
extern boolean enFilter();
/*
* Auto-configure declarations
* (note that the driver indirects physical unit numbers through a
* table to generate logical unit numbers. This is basically used
* to allow one configuration file to be used on all systems while
* the device address may be at either of two locations.)
*/
extern int enprobe(), enattach(), enrint(), enxint();
struct uba_device *endinfo[NEN];
u_short enstd[] = { 0160020, 0164000 };
struct uba_driver enetdriver =
{ enprobe, 0, enattach, 0, enstd, "enet", endinfo };
/*
* forAllOpenDescriptors(p) -- a macro for iterating
* over all currently open devices. Use it in place of
* "for ( ...; ... ; ... )"
* and supply your own loop body. The loop variable is the
* parameter p which is set to point to the descriptor for
* each open device in turn.
*/
#define forAllOpenDescriptors(p) \
for ((p) = (struct enOpenDescriptor *)enDesq.enQ_F; \
(struct Queue *)(&enDesq) != &((p)->enOD_Link); \
(p) = (struct enOpenDescriptor *)(p)->enOD_Link.F)
/*
* enInitQueue - initialize ethernet queue
*/
#define enInitQueue(q) \
{ \
initqueue((struct queue *)(q)); \
(q)->enQ_NumQueued = 0; \
}
/*
* enEnqueue - add an element to a queue
*/
#define enEnqueue(q, elt) \
{ \
enqueue((struct queue *)(q), (struct queue *)(elt)); \
(q)->enQ_NumQueued++; \
}
/*
* enFlushQueue - release all packets from queue, freeing any
* whose reference counts drop to 0. Assumes caller
* is at high IPL so that queue will not be modified while
* it is being flushed.
*/
enFlushQueue(q)
register struct enQueue *q;
{
register struct enPacket *qelt;
while((qelt=(struct enPacket *)dequeue((struct queue *)q)) != NULL)
{
if (0 == --(qelt->enP_RefCount))
{
enEnqueue(&enFreeq, qelt);
}
}
q->enQ_NumQueued = 0;
}
/*
* enInitWaitQueue - initialize an empty packet wait queue
*/
enInitWaitQueue(wq)
register struct enWaitQueue *wq;
{
wq->enWQ_Head = 0;
wq->enWQ_Tail = 0;
wq->enWQ_NumQueued = 0;
wq->enWQ_MaxWaiting = ENDEFWAITING;
}
/*
* enEnWaitQueue - add a packet to a wait queue
*/
enEnWaitQueue(wq, p)
register struct enWaitQueue *wq;
struct enPacket *p;
{
wq->enWQ_Packets[wq->enWQ_Tail] = p;
wq->enWQ_NumQueued++;
enNextWaitQueueIndex(wq->enWQ_Tail);
}
/*
* enDeWaitQueue - remove a packet from a wait queue
*/
struct enPacket *
enDeWaitQueue(wq)
register struct enWaitQueue *wq;
{
struct enPacket *p;
wq->enWQ_NumQueued--;
if (wq->enWQ_NumQueued < 0)
panic("enDeWaitQueue");
p = wq->enWQ_Packets[wq->enWQ_Head];
enNextWaitQueueIndex(wq->enWQ_Head);
return(p);
}
/*
* enTrimWaitQueue - cut a wait queue back to size
*/
enTrimWaitQueue(wq, threshold)
register struct enWaitQueue *wq;
{
register int Counter = (wq->enWQ_NumQueued - threshold);
register struct enPacket *p;
#ifdef DEBUG
enprintf(1)("enTrimWaitQueue(%x, %d): %d\n", wq, threshold, Counter);
#endif
while (Counter-- > 0)
{
wq->enWQ_NumQueued--;
enPrevWaitQueueIndex(wq->enWQ_Tail);
p = wq->enWQ_Packets[wq->enWQ_Tail];
if (0 == --(p->enP_RefCount))
{
enEnqueue(&enFreeq, p);
}
}
}
/*
* enFlushWaitQueue - remove all packets from wait queue
*/
#define enFlushWaitQueue(wq) enTrimWaitQueue(wq, 0)
/*
* scavenging thresholds:
*
* index by number of active files; for N open files, each queue may retain
* up to 1/Nth of the packets not guaranteed to be freed on scavenge. The
* total number of available packets is computed less any which are currently
* allocated to other kernel applications and also less those currently
* on the transmit queues.
*
* (assumes high IPL)
*/
char enScavLevel[(NEN*ENMAXOPENS)+1];
/*
* enInitScavenge -- set up ScavLevel table
*/
enInitScavenge()
{
register int PoolSize = (ENPACKETS-enAllocCount-ENMINSCAVENGE);
register int i = (NEN*ENMAXOPENS)+1;
register struct enState *enStatep;
for (enStatep=enState; enStatep < &enState[NEN]; enStatep++)
PoolSize -= enXmitq.enQ_NumQueued;
while (--i>0)
enScavLevel[i] = (PoolSize / i);
}
/*
* enScavenge -- scan all OpenDescriptors for all ethernets, releasing
* any queued buffers beyond the prescribed limit and freeing any whose
* refcounts drop to 0.
* Assumes caller is at high IPL so that it is safe to modify the queues.
*/
enScavenge()
{
register struct enOpenDescriptor *d;
register int threshold = 0;
register struct enState *enStatep;
for (enStatep=enState; enStatep < &enState[NEN]; enStatep++)
threshold += enCurOpens;
threshold = enScavLevel[threshold];
/* recalculate thresholds based on current allocations */
enInitScavenge();
enScavenges++;
#ifdef DEBUG
enprintf(1)("enScavenge: %d\n", threshold);
#endif
for (enStatep=enState; enStatep < &enState[NEN]; enStatep++)
{
if (enDesq.enQ_F == 0)
continue; /* never initialized */
forAllOpenDescriptors(d)
{
enTrimWaitQueue(&(d->enOD_Waiting), threshold);
}
}
}
/*
* enAllocatePacket - allocate the next packet from the free list
*
* Assumes IPL is at high priority so that it is safe to touch the
* packet queue. If the queue is currently empty, scavenge for
* more packets.
*/
struct enPacket *
enAllocatePacket()
{
register struct enPacket *p;
if (0 == enFreeq.enQ_NumQueued)
enScavenge();
p = (struct enPacket *)dequeue((struct queue *)&enFreeq);
if (p == NULL)
panic("enAllocatePacket");
if (enFreeqMin > --enFreeq.enQ_NumQueued)
enFreeqMin = enFreeq.enQ_NumQueued;
return(p);
}
/*
* enDeallocatePacket - place the packet back on the free packet queue
*
* (High IPL assumed).
*/
#define enDeallocatePacket(p) \
{ \
enqueue((struct queue *)&enFreeq, (struct queue *)(p)); \
enFreeq.enQ_NumQueued++; \
}
/*
* enopen - open ether net device
*
* Callable from user or kernel mode. Bit ENKERNEL of the flag argument
* indicates whether or not the open is being done by the kernel. This
* bit is remembered for later use by read, write, and ioctl.
*
* Errors: ENXIO - illegal minor device number
* EBUSY - minor device already in use
* ENOMEM - unable to allocate low 64K UBA memory
*/
/* ARGSUSED */
enopen(dev, flag)
{
register int md = ENINDEX(dev);
register struct enState *enStatep;
register struct uba_device *ui;
int ipl;
/*
* Each open enet file has a different minor device number.
* In general, a prospective net user must try successively
* to open the devices 'enet', 'enet1', ... 'enetn' where n
* is MAXOPENS-1. This is not elegant, but UNIX will call
* open for each new open file using the same inode but calls
* close only when the last open file referring to the inode
* is released. This means that we cannot know inside the
* driver code when the resources associated with a particular
* open of the same inode should be deallocated. Thus, we have
* simply made up a number of different inodes to represent the
* simultaneous opens of the ethernet. Each of these has
* a different minor device number.
*
* When opening an ethernet device in the kernel, simply iterate
* through minor device numbers until an open fails to return
* EBUSY.
*/
#ifdef DEBUG
enprintf(1)("enopen(%d, %x):\n", md, flag);
#endif
/* check for illegal minor dev */
if (md >= ENMAXOPENS
|| (ui=endinfo[ENUNIT(dev)]) == 0
|| ui->ui_alive == 0)
{
u.u_error = ENXIO;
return;
}
enStatep = &enState[ENUNIT(dev)];
if (enOpenFlag[md])
{
u.u_error = EBUSY;
return;
}
enOpenFlag[md] = TRUE;
/* initialize unit on first open */
if (enDesq.enQ_F == 0)
{
enInit(enStatep, ui);
if (u.u_error)
{
enOpenFlag[md] = FALSE;
return;
}
}
ipl = spl6();
if (enRP == NULL)
enrstart(ui);
splx(ipl);
enprintf(2)("enopen: Desq: %x, %x\n", enDesq.enQ_F, enDesq.enQ_B);
enInitDescriptor(&enAllDescriptors[md], flag);
enInsertDescriptor(&(enDesq), &enAllDescriptors[md]);
}
/*
* enInit - intialize ethernet unit
*/
enInit(enStatep, ui)
register struct enState *enStatep;
register struct uba_device *ui;
{
#ifdef DEBUG
enprintf(1)("enInit(%x, %x):\n", enStatep, ui);
#endif
/*
* Allocate unibus BDP and mapping registers to be used for all
* subsequent transfers. The mapped address is arbitrary since
* we remap every I/O. The low order (byte offset) bits must be zero,
* however, since ubaremap() neglects to clear them from the old
* resource values when remapping.
*
* Since the ethernet interface ignores the extended
* memory address bits, we must insure that the
* allocated portions of the unibus map are all in
* the first 64K before proceeding.
*
* We can't afford to sleep in uballoc() since someone else might
* come along and open another minor device before we finished
* the initialization. We could fix this by blocking opens
* while initializing but this hardly seems worth it since if
* UBA resources are so scare, chances are good we won't get
* allocated to the low 64K anyway.
*/
enRinfo = uballoc(ui->ui_ubanum, (caddr_t)0x80000000, ENUBAALLOCSIZE, UBA_NEEDBDP|UBA_CANTWAIT);
enXinfo = uballoc(ui->ui_ubanum, (caddr_t)0x80000000, ENUBAALLOCSIZE, UBA_NEEDBDP|UBA_CANTWAIT);
/*
* check that the ENDs of the UNIBUS spaces allocated
* are both inside the 64K byte addressing limit of the
* interface
*/
if ( ((enRinfo + ENUBAALLOCSIZE)&0x30000) ||
((enXinfo + ENUBAALLOCSIZE)&0x30000) ||
enRinfo == 0 || enXinfo == 0 )
{
ubarelse(ui->ui_ubanum, &enRinfo); /* ubarelse() allows these to
ubarelse(ui->ui_ubanum, &enXinfo); * be zero and just returns */
u.u_error = ENOMEM;
return;
}
enRerrors = 0;
enXerrors = 0;
enRand = time;
enXP = NULL;
enRP = NULL;
/* initialize free queue on first unit */
if (enFreeq.enQ_F == 0)
{
register int i;
initqueue((struct queue *)&enFreeq);
for (i=0; i<ENPACKETS; i++)
{
register struct enPacket *p;
p = &enQueueElts[i];
p->enP_Func = NULL;
p->enP_RefCount = 0;
p->enP_Data = (u_short *)&enPackets[i][0];
enDeallocatePacket(p);
}
}
initqueue((struct queue *)&enXmitq);
initqueue((struct queue *)&enDesq);
}
/*
* enclose - ether net device close routine
*
* Callable from user or kernel mode.
*/
/* ARGSUSED */
enclose(dev, flag)
{
register int md = ENINDEX(dev);
register struct enState *enStatep = &enState[ENUNIT(dev)];
register struct uba_device *ui = endinfo[ENUNIT(dev)];
register struct enOpenDescriptor *d = &enAllDescriptors[md];
int ipl;
enOpenFlag[md] = FALSE;
#ifdef DEBUG
enprintf(1)("enclose(%d, %x):\n", md, flag);
#endif
/*
* insure that receiver doesn't try to queue something
* for the device as we are decommissioning it.
* (I don't think this is necessary, but I'm a coward.)
*/
ipl = spl6();
dequeue((struct queue *)d->enOD_Link.B);
enCurOpens--;
enprintf(2)("enclose: Desq: %x, %x\n", enDesq.enQ_F, enDesq.enQ_B);
enFlushWaitQueue(&(d->enOD_Waiting));
splx(ipl);
if (enCurOpens == 0)
{
/*
* All done. No enet files are now open. We can
* close down shop completely. We don't bother to
* deallocate the UNIBUS resources though, since
* they are short and they may not get allocated in
* the first 64K bytes next time around.
*/
ipl = spl6();
ENADDR->enrcsr = 0;
ENADDR->enxcsr = 0;
enFlushQueue(&enXmitq);
if (enXP != NULL)
{
enDeallocatePacket(enXP);
enXP = NULL;
}
if (enRP != NULL)
{
enDeallocatePacket(enRP);
enRP = NULL;
}
splx(ipl);
}
}
/*
* enread - read next packet from net
*
* Callable from user or kernel mode (checks OD_Flag)
*/
/* VARARGS */
enread(dev, pp)
dev_t dev;
struct enPacket **pp;
{
register struct enState *enStatep = &enState[ENUNIT(dev)];
register struct enOpenDescriptor *d = &enAllDescriptors[ENINDEX(dev)];
register struct enPacket *p;
int ipl;
extern enTimeout(), enrstart();
#if DEBUG
enprintf(1)("enread(%x):", dev);
#endif
ipl = spl6();
/*
* If nothing is on the queue of packets waiting for
* this open enet file, then set timer and sleep until
* either the timeout has occurred or a packet has
* arrived.
*/
while (0 == d->enOD_Waiting.enWQ_NumQueued)
{
if (d->enOD_Timeout < 0)
{
splx(ipl);
return;
}
if (d->enOD_Timeout)
{
/*
* If there was a previous timeout pending for this file,
* cancel it before setting another. This is necessary since
* a cancel after the sleep might never happen if the read is
* interrupted by a signal.
*/
if (d->enOD_RecvState == ENRECVTIMING)
untimeout(enTimeout, (caddr_t)d);
timeout(enTimeout, (caddr_t)d, d->enOD_Timeout);
d->enOD_RecvState = ENRECVTIMING;
}
else
d->enOD_RecvState = ENRECVIDLE;
sleep((caddr_t)d, PRINET);
switch (d->enOD_RecvState)
{
case ENRECVTIMING:
{
untimeout(enTimeout, (caddr_t)d);
d->enOD_RecvState = ENRECVIDLE;
break;
}
case ENRECVTIMEDOUT:
{
splx(ipl);
return;
}
}
}
p = enDeWaitQueue(&(d->enOD_Waiting));
splx(ipl);
/*
* Kernel mode read
*
* Return pointer to packet. It must be subsequently freed via an
* EIOCDEALLOCP ioctl() call and may not be changed since it may
* have also been on some other wait queue.
*
* (we don't use any fields of the U area since we can't guarantee
* our context).
*/
if (d->enOD_Flag & ENKERNEL)
{
enAllocCount++; /* packet allocated to kernel */
*pp = p;
return;
}
/*
* User mode read
*
* Move data from packet into user space. Throw away
* any data left over if u.u_count is less than
* the number of 16-bit words in the packet.
*/
iomove((caddr_t)(p->enP_Data),
min((p->enP_WordCount)<<1, u.u_count),
B_READ);
ipl = spl6();
if (0 == --(p->enP_RefCount))
{
enDeallocatePacket(p); /* else release buffer */
}
splx(ipl);
}
/*
* enwrite - write next packet to net
*
* Callable from user or kernel mode (checks OD_Flag).
*/
/* VARARGS */
enwrite(dev, ep)
dev_t dev;
register struct enPacket *ep;
{
register struct enState *enStatep = &enState[ENUNIT(dev)];
register struct enOpenDescriptor *d = &enAllDescriptors[ENINDEX(dev)];
register struct uba_device *ui = endinfo[ENUNIT(dev)];
register struct enPacket *p;
register u_int ByteCount;
int ipl;
extern enxstart();
#if DEBUG
enprintf(1)("enwrite(%x):\n", dev);
#endif
/*
* User mode write
*
* Allocate packet for transmit. Block if too many transmit packets
* have already been allocated until some are freed.
*
* Copy user data into packet (if ubaremap() handled user page tables
* we might be able to get away with direct mapping without copying here).
*/
if ((d->enOD_Flag&ENKERNEL) == 0)
{
if (u.u_count == 0)
return;
ipl = spl6();
while (enXmitq.enQ_NumQueued >= ENXPACKETS)
sleep((caddr_t)&enXmitq, PRINET);
p = enAllocatePacket();
p->enP_RefCount++;
splx(ipl);
iomove((caddr_t)(p->enP_Data),
ByteCount=min(ENPACKETSIZE<<1, u.u_count),
B_WRITE);
p->enP_WordCount = (ByteCount+1)>>1;
}
else
/*
* Kernel mode write
*
* Use packet supplied by caller.
*
* (Again, we avoid using any fields of the U area since we may be called
* in interrupt context).
*/
{
p = ep;
}
/*
* Enforce correct source host
*/
*((u_char *)(p->enP_Data)) = ~(ENADDR->enaddr);
ipl = spl6();
if (enXmitqMax < ++enXmitq.enQ_NumQueued)
enXmitqMax = enXmitq.enQ_NumQueued;
if (NULL == enXP) /* if transmitter is idle */
{
enXmitq.enQ_NumQueued--;
enXP = p; /* use buffer to */
enMask = -1;
enxstart(ui); /* start write */
}
else
enqueue((struct queue *)&enXmitq, (struct queue *)p); /* just queue it */
splx(ipl);
}
/*
* enioctl - ether net control
*
* Callable from user or kernel mode (checks OD_Flag)
*
* EIOCGETP - get ethernet parameters
* EIOCSETP - set ethernet read timeout
* EIOCSETF - set ethernet read filter
* EIOCENBS - enable signal when read packet available
* EIOCINHS - inhibit signal when read packet available
* FIONREAD - check for read packet available
* EIOCSETW - set maximum read packet waiting queue length
* EIOCFLUSH - flush read packet waiting queue
* EIOCALLOCP - allocate packet (kernel only)
* EIOCDEALLOCP - deallocate packet (kernel only)
*
* encopyin() copies data from caller to driver based on mode of open
* encopyout() copies data from driver to caller based on mode of open
*/
#define encopyin(from, to, size) \
if (d->enOD_Flag&ENKERNEL) \
bcopy(from, to, size); \
else \
if (copyin(from, to, size)) \
{ \
u.u_error = EFAULT; \
return; \
}
#define encopyout(from, to, size) \
if (d->enOD_Flag&ENKERNEL) \
bcopy(from, to, size); \
else \
if (copyout(from, to, size)) \
{ \
u.u_error = EFAULT; \
return; \
}
/* ARGSUSED */
enioctl(dev, cmd, addr, flag)
caddr_t addr;
dev_t flag;
{
register struct uba_device *ui = endinfo[ENUNIT(dev)];
register struct enState *enStatep = &enState[ENUNIT(dev)];
register struct enOpenDescriptor * d = &enAllDescriptors[ENINDEX(dev)];
int ipl;
#if DEBUG
enprintf(1)("enioctl(%x, %x, %x, %x):\n", dev, cmd, addr, flag);
#endif
switch (cmd)
{
case EIOCGETP:
{
struct eniocb t;
t.en_maxwaiting = ENMAXWAITING;
t.en_maxpriority = (ENINDEX(dev) < ENHIPRIDEV)?(ENHIPRI-1):ENMAXPRI;
t.en_rtout = d->enOD_Timeout;
t.en_addr = ~(ENADDR->enaddr);
t.en_maxfilters = ENMAXFILTERS;
t.en_pad1 = 0; /* for future expansion */
encopyout((caddr_t)&t, addr, sizeof t);
}
endcase
case EIOCSETP:
{
struct eniocb t;
encopyin(addr, (caddr_t)&t, sizeof t);
d->enOD_Timeout = t.en_rtout;
}
endcase
case EIOCSETF:
{
struct enfilter f;
encopyin(addr, (caddr_t)&f, sizeof f);
if ((ENINDEX(dev) < ENHIPRIDEV && f.enf_Priority >= ENHIPRI) ||
f.enf_FilterLen > ENMAXFILTERS)
{
u.u_error = EINVAL;
return;
}
/* insure that filter is installed indivisibly */
ipl = spl6();
bcopy((caddr_t)&f, (caddr_t)&(d->enOD_OpenFilter), sizeof f);
dequeue((struct queue *)d->enOD_Link.B);
enDesq.enQ_NumQueued--;
enInsertDescriptor(&(enDesq), d);
splx(ipl);
}
endcase
/*
* Enable signal n on input packet
*/
case EIOCENBS:
{
union {int n; int (*func)();} un;
encopyin(addr, (caddr_t)&un, sizeof un);
if ((d->enOD_Flag & ENKERNEL) == 0)
{
if (un.n < NSIG)
{
d->enOD_SigProc = u.u_procp;
d->enOD_SigPid = u.u_procp->p_pid;
d->enOD_SigNumb = un.n; /* This must be set last */
}
else
{
goto bad;
}
}
else
{
d->enOD_SigFunc = un.func;
d->enOD_SigNumb = NSIG; /* This must be set last */
}
}
endcase
/*
* Disable signal on input packet
*/
case EIOCINHS:
{
if ((d->enOD_Flag & ENKERNEL) == 0)
{
d->enOD_SigNumb = 0;
}
else
{
d->enOD_SigFunc = NULL;
}
}
endcase
/*
* Check for packet waiting
*/
case FIONREAD:
{
int n;
register struct enWaitQueue *wq;
ipl = spl6();
if ((wq = &(d->enOD_Waiting))->enWQ_NumQueued)
n = (wq->enWQ_Packets[wq->enWQ_Head]->enP_WordCount)<<1;
else
n = 0;
splx(ipl);
encopyout((caddr_t)&n, addr, sizeof n);
}
endcase
/*
* Set maximum recv queue length for a device
*/
case EIOCSETW:
{
unsigned un;
encopyin(addr, (caddr_t)&un, sizeof un);
/*
* unsigned un MaxQueued
* ---------------- ------------
* 0 -> DEFWAITING
* 1..MAXWAITING -> un
* MAXWAITING..-1 -> MAXWAITING
*/
d->enOD_Waiting.enWQ_MaxWaiting = (un) ? min(un, ENMAXWAITING)
: ENDEFWAITING;
}
endcase
/*
* Flush all packets queued for a device
*/
case EIOCFLUSH:
{
ipl = spl6();
enFlushWaitQueue(&(d->enOD_Waiting));
splx(ipl);
}
endcase
/*
* Set mode bits
*/
case EIOCMBIS:
{
u_short mode;
encopyin(addr, (caddr_t)&mode, sizeof mode);
if (mode&ENPRIVMODES)
u.u_error = EINVAL;
else
d->enOD_Flag |= mode;
break;
}
/*
* Clear mode bits
*/
case EIOCMBIC:
{
u_short mode;
encopyin(addr, (caddr_t)&mode, sizeof mode);
if (mode&ENPRIVMODES)
u.u_error = EINVAL;
else
d->enOD_Flag &= ~mode;
break;
}
/*
* Allocate an ethernet packet (kernel only)
*/
case EIOCALLOCP:
{
register struct enPacket *p;
if ((d->enOD_Flag&ENKERNEL) == 0)
goto bad;
ipl = spl6();
p = enAllocatePacket();
p->enP_RefCount++;
enAllocCount++;
splx(ipl);
*(struct enPacket **)addr = p;
}
endcase
/*
* Deallocate an ethernet packet (kernel only)
*/
case EIOCDEALLOCP:
{
register struct enPacket *p;
if ((d->enOD_Flag & ENKERNEL) == 0)
goto bad;
p = *(struct enPacket **)addr;
ipl = spl6();
enAllocCount--;
if (--(p->enP_RefCount) == 0)
enDeallocatePacket(p);
splx(ipl);
}
endcase
default:
{
bad:
u.u_error = EINVAL;
}
endcase
}
}
/*
* enTimeout - process ethernet read timeout
*/
enTimeout(d)
struct enOpenDescriptor * d;
{
register int ipl;
#ifdef DEBUG
enprintf(1)("enTimeout(%x):\n", d);
#endif
ipl = spl6();
d->enOD_RecvState = ENRECVTIMEDOUT;
wakeup((caddr_t)d);
splx(ipl);
}
/*
* enrstart - start read operation on net
*/
enrstart(ui)
register struct uba_device *ui;
{
register struct enState *enStatep = &enState[ui->ui_unit];
#if DEBUG
enprintf(1)("enrstart(%x):\n", ui);
#endif
/*
* We are only called when priority is >= 6 or when
* receiver is inactive (during initialization).
* So it is safe to go get a free buffer.
*/
if (NULL == enRP)
{
enRP = enAllocatePacket();
}
ENADDR->enrwc = -ENPACKETSIZE;
ENADDR->enrba = ubaremap(ui->ui_ubanum, (unsigned)enRinfo, (caddr_t)enRP->enP_Data);
ENADDR->enrcsr = ENCSR_IE|ENCSR_GO;
#if DEBUG
enprintf(1)("enrstarted\n");
#endif
}
/*
* enxstart - start packet transmission on net
*/
enxstart(ui)
register struct uba_device *ui;
{
register struct enState *enStatep = &enState[ui->ui_unit];
#ifdef DEBUG
enprintf(1)("enxstart(%x):\n", ui);
#endif
/*
* Synchronization not needed here because only transmitter
* touches anything we touch, and only if it is active, in
* which case we wouldn't be here.
*/
if (NULL == enXP)
{
enXP = (struct enPacket *)dequeue((struct queue *)&enXmitq);
if (enXP == NULL)
return;
if (enXmitq.enQ_NumQueued-- == ENXPACKETS)
wakeup((caddr_t)&enXmitq);
enMask = -1; /* first try with new pkt */
}
ENADDR->enxwc = -(enXP->enP_WordCount);
ENADDR->enxba = ubaremap(ui->ui_ubanum, (unsigned)enXinfo, (caddr_t)enXP->enP_Data);
ENADDR->enxdly = enRand & ~enMask;
ENADDR->enxcsr = ENCSR_IE|ENCSR_GO;
#ifdef DEBUG
enprintf(1)("enxstarted\n");
#endif
}
/*
* enrint - net read interrupt handler
*/
enrint(en)
{
register struct enState *enStatep = &enState[en];
register struct uba_device *ui = endinfo[en];
register struct enPacket *p;
#if DEBUG
enprintf(1)("enrint(%d):\n", en);
#endif
if (NULL == (p=enRP))
{
printf("enet%d: spurious input interrupt\n", en);
return;
}
/*
* We don't need to do a purge here since the ubaremap() will do one
* when the next read is started (this works as long as the read
* is started before passing the packet through the filters where
* the incomplete buffer might cause the packet to be dropped).
*/
if (ENADDR->enrcsr&ENCSR_ERR)
{
enRerrors++;
enprintf(4)("en%d: bad packet\n", en);
enrstart(ui);
return;
}
enRP = NULL;
p->enP_WordCount = (ENPACKETSIZE + ENADDR->enrwc)&01777;
enrstart(ui);
enInputDone(enStatep, p);
}
/*
* enInputDone - process correctly received packet
*/
enInputDone(enStatep, p)
register struct enState *enStatep;
register struct enPacket *p;
{
register struct enOpenDescriptor *d;
#if DEBUG
enprintf(1)("enInputDone(%x): %x\n", enStatep, p);
#endif
forAllOpenDescriptors(d)
{
if (enFilter(p,d))
{
if (d->enOD_Waiting.enWQ_NumQueued < d->enOD_Waiting.enWQ_MaxWaiting)
{
enEnWaitQueue(&(d->enOD_Waiting), p);
p->enP_RefCount++;
wakeup((caddr_t)d);
#if DEBUG
enprintf(1)("enInputDone: queued\n");
#endif
}
/* send notification when input packet received */
if (d->enOD_SigNumb)
{
if (d->enOD_SigNumb < NSIG)
{
if (d->enOD_SigProc->p_pid == d->enOD_SigPid)
psignal(d->enOD_SigProc, d->enOD_SigNumb);
if ((d->enOD_Flag & ENHOLDSIG) == 0)
d->enOD_SigNumb = 0; /* disable signal */
}
else
{
(*(d->enOD_SigFunc))();
}
}
if (d->enOD_OpenFilter.enf_Priority >= ENHIPRI)
break;
}
}
if (p->enP_RefCount == 0) /* this buffer no longer in */
{
enDeallocatePacket(p); /* use; return to free queue */
enRdrops++;
}
else
enRcnt++;
}
/*
* enxint - net transmit interrupt handler
*/
enxint(en)
{
register struct enState *enStatep = &enState[en];
register struct uba_device *ui = endinfo[en];
#if DEBUG
enprintf(1)("enxint(%d):\n", en);
#endif
if (NULL == enXP)
{
printf("enet%d: spurious transmit interrupt\n", en);
return;
}
/*
* We no longer need to do a purge here since the ubaremap()
* when the next packet is transmitted will do one for us.
*/
if ((ENADDR->enxcsr&ENCSR_ERR))
{
enXerrors++;
enprintf(4)("enet%d: collision\n", en);
if (enMask) /* if < 16 retransmissions */
{
enMask <<= 1; /* ~double delay and try again */
enxstart(ui);
return;
}
else
{ /* give up on this pkt */
enXdrops++;
enprintf(4)("enet%d: dropped packet\n", en);
}
}
else
enXcnt++;
if (enXP->enP_Func != NULL)
(*(enXP->enP_Func))(enXP);
else if (--(enXP->enP_RefCount) == 0)
enDeallocatePacket(enXP);
enXP = NULL;
enxstart(ui);
}
enInitDescriptor(d, flag)
register struct enOpenDescriptor *d;
{
#if DEBUG
enprintf(1)("enInitDescriptor(%x):\n", d);
#endif
d->enOD_RecvState = ENRECVIDLE;
d->enOD_OpenFilter.enf_FilterLen = 0;
d->enOD_OpenFilter.enf_Priority = 0;
d->enOD_Timeout = 0;
d->enOD_SigNumb = 0;
d->enOD_Flag = (flag&ENKERNEL);
enInitWaitQueue(&(d->enOD_Waiting));
#if DEBUG
enprintf(1)("=>eninitdescriptor\n");
#endif
}
#define opx(i) (i>>ENF_NBPA)
boolean
enFilter(p,d)
struct enPacket *p;
struct enOpenDescriptor *d;
{
register unsigned short *sp;
register unsigned short *fp;
register unsigned short *fpe;
register unsigned op;
register unsigned arg;
register maxword;
unsigned short stack[ENMAXFILTERS+1];
struct fw {unsigned arg:ENF_NBPA, op:ENF_NBPO;};
#ifdef DEBUG
enprintf(1)("enFilter(%x,%x):\n", p, d);
#endif
sp = &stack[ENMAXFILTERS];
maxword = p->enP_WordCount+ENF_PUSHWORD;
fp = &d->enOD_OpenFilter.enf_Filter[0];
fpe = &fp[d->enOD_OpenFilter.enf_FilterLen];
*sp = TRUE;
for (; fp < fpe; )
{
op = ((struct fw *)fp)->op;
arg = ((struct fw *)fp)->arg;
fp++;
switch (arg)
{
default:
if ((arg >= ENF_PUSHWORD)&&(arg < maxword))
*--sp = p->enP_Data[arg-ENF_PUSHWORD];
else
{
#ifdef DEBUG
enprintf(1)("=>0(len)\n");
#endif
return(false);
}
break;
case ENF_PUSHLIT:
*--sp = *fp++;
break;
case ENF_PUSHZERO:
*--sp = 0;
case ENF_NOPUSH:
break;
}
if (op == ENF_NOP)
continue;
if (sp > &stack[ENMAXFILTERS-2])
{
#ifdef DEBUG
enprintf(1)("=>0(sp)\n");
#endif
return(false);
}
arg = *sp++;
switch (op)
{
default:
#ifdef DEBUG
enprintf(1)("=>0(def)\n");
#endif
return(false);
case opx(ENF_AND):
*sp &= arg;
break;
case opx(ENF_OR):
*sp |= arg;
break;
case opx(ENF_XOR):
*sp ā= arg;
break;
case opx(ENF_EQ):
*sp = (*sp == arg);
break;
case opx(ENF_LT):
*sp = (*sp < arg);
break;
case opx(ENF_LE):
*sp = (*sp <= arg);
break;
case opx(ENF_GT):
*sp = (*sp > arg);
break;
case opx(ENF_GE):
*sp = (*sp >= arg);
break;
}
}
#ifdef DEBUG
enprintf(1)("=>%x\n", *sp);
#endif
return((boolean)*sp);
}
/*
* enInsertDescriptor - insert open descriptor in queue ordered by priority
*/
enInsertDescriptor(q, d)
register struct enQueue *q;
register struct enOpenDescriptor *d;
{
struct enOpenDescriptor * nxt;
register int ipl;
ipl = spl6();
nxt = (struct enOpenDescriptor *)q->enQ_F;
while ((struct Queue *)q != &(nxt->enOD_Link))
{
if (d->enOD_OpenFilter.enf_Priority > nxt->enOD_OpenFilter.enf_Priority)
break;
nxt = (struct enOpenDescriptor *)nxt->enOD_Link.F;
}
enqueue((struct queue *)&(nxt->enOD_Link),(struct queue *)&(d->enOD_Link));
enprintf(2)("enID: Desq: %x, %x\n", q->enQ_F, q->enQ_B);
q->enQ_NumQueued++;
splx(ipl);
}
enattach(ui)
register struct uba_device *ui;
{
static int enunit = 0;
enUnit[enunit++] = ui->ui_unit;
}
enprobe(reg)
caddr_t reg;
{
register int br,cvec;
#ifdef lint
br = 0; cvec = br; br = cvec;
#endif
/*
* This sends a very small garbage packet out on the net. Hope
* nobody minds.
*/
((struct enreg *)reg)->enxwc = -1;
((struct enreg *)reg)->enxba = 0;
((struct enreg *)reg)->enxcsr = ENCSR_IE|ENCSR_GO;
DELAY(10000);
((struct enreg *)reg)->enxcsr = 0;
/*
* In case of collision, wrong vector will be generated. Always
* strip off low 4 bits to get base vector address (this means that
* the ethernet board must be set to interrupt at a quadlongword
* address).
*/
cvec &= ~0xf; /* in case of collision interrupt */
return(1);
}
/*
* enreset - reset the ethernet
*
* Called on UBA reset to restart pending I/O.
*
* Just prints the device name and restarts the receiver and
* transmitter. No other state that we care about is lost by
* the reset.
*/
enreset(uban)
{
register int en;
register struct uba_device *ui;
for (en = 0; en < NEN; en++)
{
if ((ui = endinfo[en]) == 0 || ui->ui_alive == 0 ||
ui->ui_ubanum != uban)
continue;
printf(" enet%d", en);
if (enState[en].ens_Desq.enQ_F == 0)
continue; /* never initialized */
/*
* Normally we would free the UBA resources here. Since we
* don't care that the UBA reset has invalidated the mapping
* registers (because we remap each ethernet I/O anyway),
* we can safely ignore this and continue knowing that we still
* have the same mapping registers allocated in the first 64K bytes.
*/
/* Restart the receiver */
enrstart(ui);
/* Restart the transmitter (if necessary) */
enxstart(ui);
}
}
#endif